bitkeeper revision 1.1159.272.8 (4240716dixo5jLBihZPvbRrP21dn4g)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 22 Mar 2005 19:26:37 +0000 (19:26 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 22 Mar 2005 19:26:37 +0000 (19:26 +0000)
Schedule page scrubbing for dead domains off the per-cpu periodic
ticker. We take 10% of busy cpus and all of idle cpu time.
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/domain.c
xen/common/page_alloc.c
xen/common/schedule.c
xen/include/xen/mm.h
xen/include/xen/softirq.h

index 9c81c9c7182605464425e9db1e9d85daef6b62c3..812d14c5062ad185447a48e6b0451774cfeb0165 100644 (file)
@@ -69,7 +69,10 @@ static __attribute_used__ void idle_loop(void)
     {
         irq_stat[cpu].idle_timestamp = jiffies;
         while ( !softirq_pending(cpu) )
+        {
+            page_scrub_schedule_work();
             default_idle();
+        }
         do_softirq();
     }
 }
index d8ac2d4ca05a5188f6aa7ce30885fe4e8f685d41..8637b8cea5cd85d1880815bbae66500b42220160 100644 (file)
@@ -28,6 +28,7 @@
 #include <xen/spinlock.h>
 #include <xen/slab.h>
 #include <xen/irq.h>
+#include <xen/softirq.h>
 #include <asm/domain_page.h>
 
 /*
@@ -551,7 +552,6 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order)
 {
     int            i, drop_dom_ref;
     struct domain *d = pg->u.inuse.domain;
-    void          *p;
 
     ASSERT(!in_irq());
 
@@ -579,26 +579,31 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order)
             pg[i].tlbflush_timestamp  = tlbflush_current_time();
             pg[i].u.free.cpu_mask     = 1 << d->processor;
             list_del(&pg[i].list);
+        }
+
+        d->tot_pages -= 1 << order;
+        drop_dom_ref = (d->tot_pages == 0);
+
+        spin_unlock_recursive(&d->page_alloc_lock);
 
+        if ( likely(!test_bit(DF_DYING, &d->flags)) )
+        {
+            free_heap_pages(MEMZONE_DOM, pg, order);
+        }
+        else
+        {
             /*
              * Normally we expect a domain to clear pages before freeing them,
              * if it cares about the secrecy of their contents. However, after
              * a domain has died we assume responsibility for erasure.
              */
-            if ( unlikely(test_bit(DF_DYING, &d->flags)) )
+            for ( i = 0; i < (1 << order); i++ )
             {
-                p = map_domain_mem(page_to_phys(&pg[i]));
-                clear_page(p);
-                unmap_domain_mem(p);
+                spin_lock(&page_scrub_lock);
+                list_add(&pg[i].list, &page_scrub_list);
+                spin_unlock(&page_scrub_lock);
             }
         }
-
-        d->tot_pages -= 1 << order;
-        drop_dom_ref = (d->tot_pages == 0);
-
-        spin_unlock_recursive(&d->page_alloc_lock);
-
-        free_heap_pages(MEMZONE_DOM, pg, order);
     }
     else
     {
@@ -616,3 +621,63 @@ unsigned long avail_domheap_pages(void)
 {
     return avail[MEMZONE_DOM];
 }
+
+
+
+/*************************
+ * PAGE SCRUBBING
+ */
+
+static spinlock_t page_scrub_lock;
+struct list_head page_scrub_list;
+
+static void page_scrub_softirq(void)
+{
+    struct list_head *ent;
+    struct pfn_info  *pg;
+    void             *p;
+    int               i;
+    s_time_t          start = NOW();
+
+    /* Aim to do 1ms of work (ten percent of a 10ms jiffy). */
+    do {
+        spin_lock(&page_scrub_lock);
+
+        if ( unlikely((ent = page_scrub_list.next) == &page_scrub_list) )
+        {
+            spin_unlock(&page_scrub_lock);
+            return;
+        }
+        
+        /* Peel up to 16 pages from the list. */
+        for ( i = 0; i < 16; i++ )
+            if ( (ent = ent->next) == &page_scrub_list )
+                break;
+        
+        /* Remove peeled pages from the list. */
+        ent->next->prev = &page_scrub_list;
+        page_scrub_list.next = ent->next;
+        
+        spin_unlock(&page_scrub_lock);
+        
+        /* Working backwards, scrub each page in turn. */
+        while ( ent != &page_scrub_list )
+        {
+            pg = list_entry(ent, struct pfn_info, list);
+            ent = ent->prev;
+            p = map_domain_mem(page_to_phys(pg));
+            clear_page(p);
+            unmap_domain_mem(p);
+            free_heap_pages(MEMZONE_DOM, pg, 0);
+        }
+    } while ( (NOW() - start) < MILLISECS(1) );
+}
+
+static __init int page_scrub_init(void)
+{
+    spin_lock_init(&page_scrub_lock);
+    INIT_LIST_HEAD(&page_scrub_list);
+    open_softirq(PAGE_SCRUB_SOFTIRQ, page_scrub_softirq);
+    return 0;
+}
+__initcall(page_scrub_init);
index 62f764e9d9aa73675c943994b5a69f6ebe449b26..d16c2192f4dc8ffe8860fec5b3849283cfb235d5 100644 (file)
@@ -437,6 +437,8 @@ static void t_timer_fn(unsigned long unused)
     if ( !is_idle_task(d) && update_dom_time(d) )
         send_guest_virq(d, VIRQ_TIMER);
 
+    page_scrub_schedule_work();
+
     t_timer[d->processor].expires = NOW() + MILLISECS(10);
     add_ac_timer(&t_timer[d->processor]);
 }
index 12242ca4915a2c392bf445525187af61fe539b0a..33ce8ac603f48f8c419493486f795a786e164634 100644 (file)
@@ -2,6 +2,10 @@
 #ifndef __XEN_MM_H__
 #define __XEN_MM_H__
 
+#include <xen/config.h>
+#include <xen/list.h>
+#include <xen/spinlock.h>
+
 struct domain;
 struct pfn_info;
 
@@ -34,6 +38,15 @@ unsigned long avail_domheap_pages(void);
 #define alloc_domheap_page(_d) (alloc_domheap_pages(_d,0))
 #define free_domheap_page(_p) (free_domheap_pages(_p,0))
 
+/* Automatic page scrubbing for dead domains. */
+extern spinlock_t page_scrub_lock;
+extern struct list_head page_scrub_list;
+#define page_scrub_schedule_work()              \
+    do {                                        \
+        if ( !list_empty(&page_scrub_list) )    \
+            raise_softirq(PAGE_SCRUB_SOFTIRQ);  \
+    } while ( 0 )
+
 #include <asm/mm.h>
 
 #endif /* __XEN_MM_H__ */
index 6180dae775f0ffa9555d2a272f3ff168e1691492..a538540247b0de7bc6cd92db32f22f2814f64a8a 100644 (file)
@@ -7,7 +7,8 @@
 #define NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ 2
 #define KEYPRESS_SOFTIRQ                  3
 #define NMI_SOFTIRQ                       4
-#define NR_SOFTIRQS                       5
+#define PAGE_SCRUB_SOFTIRQ                5
+#define NR_SOFTIRQS                       6
 
 #ifndef __ASSEMBLY__